Previously only vcpu0 can sync interrupt from qemu.
Now all vcpus can sync.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
memset(&d->shared_info->evtchn_mask[0], 0xff,
sizeof(d->shared_info->evtchn_mask));
+ /* initiate spinlock for pass virq */
+ spin_lock_init(&d->arch.arch_vmx.virq_assist_lock);
+
/* Initialize the virtual interrupt lines */
vmx_virq_line_init(d);
{
#ifdef V_IOSAPIC_READY
/* Confirm virtual interrupt line signals, and set pending bits in vpd */
- if(v->vcpu_id==0)
+ if (spin_trylock(&v->domain->arch.arch_vmx.virq_assist_lock)) {
vmx_virq_line_assist(v);
+ spin_unlock(&v->domain->arch.arch_vmx.virq_assist_lock);
+ }
#endif
return;
}
atomic64_t shadow_fault_count;
struct last_vcpu last_vcpu[NR_CPUS];
+
+ struct arch_vmx_domain arch_vmx; /* Virtual Machine Extensions */
};
#define INT_ENABLE_OFFSET(v) \
(sizeof(vcpu_info_t) * (v)->vcpu_id + \
#include <asm/vtm.h>
#include <asm/vmx_platform.h>
#include <public/xen.h>
+#include <xen/spinlock.h>
#define VPD_SHIFT 17 /* 128K requirement */
#define VPD_SIZE (1 << VPD_SHIFT)
};
#define IVT_DEBUG_MAX 128
#endif
+
+struct arch_vmx_domain {
+ spinlock_t virq_assist_lock; /* spinlock for pass virq */
+};
+
struct arch_vmx_struct {
// vpd_t *vpd;
vtime_t vtm;